Testing the different components of the enveloppe

Motion Clouds: testing components of the envelope

In []:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output
In [46]:
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
  Hide output
In [47]:
def show_mc(name):
    import os
    from IPython.display import display, Image
    if os.path.isfile(name + mc.ext):
        display(Image(filename=name + mc.ext))
    if os.path.isfile(name + '_cube' + mc.ext):
        display(Image(filename=name + '_cube' + mc.ext))
    from IPython.display import HTML
    from base64 import b64encode
    video = open(name + mc.vext, "rb").read()
    video_encoded = b64encode(video)
    video_tag = '<video controls  autoplay="autoplay" loop="loop" width=50% src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
    display(HTML(data=video_tag))
  Hide output

Testing the color

In [48]:
name = 'color'
z = mc.envelope_color(fx, fy, ft)
if mc.anim_exist(mc.figpath + name): mc.figures(z, mc.figpath + name)
show_mc(mc.figpath + name)
  Hide output
In [48]:
 
  Hide output
In [48]:
 
  Hide output
In [48]:
 
  Hide output
In [49]:
# explore parameters
for alpha in [0.0, 0.5, 1.0, 1.5, 2.0]:
    # resp. white(0), pink(1), red(2) or brownian noise (see http://en.wikipedia.org/wiki/1/f_noise
    name_ = mc.figpath + name + '-alpha-' + str(alpha).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, alpha)
        mc.figures(z, name_)
    show_mc(name_)
  Hide output
In [50]:
for ft_0 in [0.125, 0.25, 0.5, 1., 2., 4.]:# time space scaling
    name_ = mc.figpath + name + '-ft_0-' + str(ft_0).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_)
    show_mc(name_)
  Hide output
In [51]:
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
    name_ = mc.figpath + name + '-contrast-' + str(contrast).replace('.', '_')
    if mc.anim_exist(name_):
        im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast)
        mc.anim_save(im, name_, display=False)
    show_mc(name_)
  Hide output
In [52]:
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
    name_ = mc.figpath + name + '-energy_contrast-' + str(contrast).replace('.', '_')
    if mc.anim_exist(name_):
        im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast, method='energy')
        mc.anim_save(im, name_, display=False)
    show_mc(name_)
  Hide output
In [53]:
for seed in [123456 + step for step in range(7)]:
    name_ = mc.figpath + name + '-seed-' + str(seed)
    if mc.anim_exist(name_):
        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), name_, display=False)
    show_mc(name_)
  Hide output
In [54]:
for size in range(5, 7):
    N_X, N_Y, N_frame = 2**size, 2**size, 2**size
    fx, fy, ft = mc.get_grids(N_X, N_Y, N_frame)
    ft_0 = N_X/float(N_frame)
    name_ = mc.figpath + name + '-size-' + str(size).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_)
    show_mc(name_)
  Hide output
In [55]:
for size in range(5, 7):
    N_frame = 2**size
    fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, N_frame)
    ft_0 = N_X/float(N_frame)
    name_ = mc.figpath + name + '-size_T-' + str(size).replace('.', '_')
    if mc.anim_exist(name_):
        z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
        mc.figures(z, name_, do_figs=False)
    show_mc(name_)
  Hide output
In [56]:
#name = 'colorfull'
#N = 256 #512
#fx, fy, ft = mc.get_grids(N, N, N)
#for seed in [123456 + step for step in range(1)]:
#    if mc.anim_exist(mc.figpath + name):
#        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False)
#        mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False, vext='.mat')
  Hide output

Testing the speed

In []:
 
  Hide output

Testing competing Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motionin a sum of two motion cloudsin opposite directions.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

In []:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output

The experiment

In [1]:
#%pycat psychopy_competing.py
  Hide output

Analysis, version 1

In a first version of the experiment, we only stored the results in a numpy file:

In [2]:
!ls  data/competing_v1_*npy
  Hide output
data/competing_v1_bruno_Dec_14_1210.npy  data/competing_v1_lup_Dec_12_1003.npy	data/competing_v1_lup_Dec_12_1013.npy  data/competing_v1_lup_Dec_14_1201.npy  data/competing_v1_meduz_Dec_14_1204.npy

In [3]:
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    print 'experiment ', fn, ', # trials=', results.shape[1]
    ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
  Hide output
experiment  data/competing_v1_bruno_Dec_14_1210.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1003.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1013.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_14_1201.npy , # trials= 10
experiment  data/competing_v1_meduz_Dec_14_1204.npy , # trials= 50

In [4]:
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    data_ = np.empty(results.shape)
    # lower stronger, detected lower = CORRECT is 1
    ax1.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==-1), 
                c='r', alpha=alpha)
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
    data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
    # upper stronger, detected lower = INCORRECT is 1
    ax1.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==-1), 
                c='b', alpha=alpha)
    # lower stronger, detected upper = INCORRECT is 1
    ax2.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==1), 
                c='b', alpha=alpha, marker='x')
    # upper stronger, detected upper = CORRECT is 1
    ax2.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==1), 
                c='r', alpha=alpha, marker='x')
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
    data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
    data.append(data_)

for ax in [ax1, ax2]:
    ax.axis([0., 1., -.1, 1.1])
    _ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
  Hide output

(note the subplots are equivalent up to a flip)

One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/

In [30]:
n_hist = 10
C_bins = np.linspace(0.5, 1., n_hist)
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
for data_ in data:
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    plt.bar(bins[:-1] + (bins[1]-bins[0])/2, (1.*hist_correct)/(1.*hist_total), width=.95*(bins[1]-bins[0]), alpha=.1)
_ = ax.axis([0.5, 1., 0., 1.1])
_ = ax.set_xlabel('contrast')
  Hide output

let's explore another way:

In [31]:
import pypsignifit as psi
  Hide output

data : an array or a list of lists containing stimulus intensities in the first column, number of correct responses (nAFC) or number of YES- responses in the second column, and number of trials in the third column. Each row should correspond to one experimental block. In addition, the sequence of the rows is taken as the sequence of data aquisition. Alternatively, the relative frequencies of correct responses resp YES responses can be given.

In [32]:
stimulus_intensities, percent_correct = np.array([]), np.array([])
for data_ in data:
    #print data_.shape
    #stimulus_intensities = np.hstack((stimulus_intensities, data_[0, : ]))
    #percent_correct = np.hstack((percent_correct, data_[1, : ]))
    #print data_[0, : ].min(), data_[0, : ].max(), data_[1, : ].mean()
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    stimulus_intensities = np.hstack((stimulus_intensities, bins[:-1] + (bins[1]-bins[0])/2))
    percent_correct = np.hstack((percent_correct, (1.*hist_correct)/(1.*hist_total)))

num_observations = len(stimulus_intensities)
print 'num_observations: ', num_observations
psidata = np.vstack((stimulus_intensities, percent_correct, np.array([2]*num_observations)))
  Hide output
num_observations:  45

BootstrapInference

In [34]:
# see http://psignifit.sourceforge.net/DESIGN.html
nafc = 2
constraints = ( 'unconstrained', 'unconstrained', 'Beta(2,20)')
B = psi.BootstrapInference ( psidata, priors=constraints, nafc=nafc )
# (threshold, slope, lapse rate)
print B.estimate
  Hide output
[ 0.49862663  0.03601899  0.04546066]

How well do these parameters describe the data? The deviance is a measure that describes the goodness of fit for a model, based on the sum of the squares error metric:

In [35]:
print B.deviance, B.getThres()#, B.getCI(1)
  Hide output
0.186105675197 0.498626633193

In [36]:
_ = psi.psigniplot.plotPMF(B, xlabel_text='contrast', showaxes=True)
  Hide output
In [37]:
B.sample()
  Hide output
In [38]:
psi.GoodnessOfFit(B)
  Hide output
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-38-730472e77ec6> in <module>()
----> 1 psi.GoodnessOfFit(B)

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in GoodnessOfFit(InferenceObject, warn)
    575     if infer in ["BayesInference","ASIRInference"]:
    576         InferenceObject.drawposteriorexamples ( ax=ax_plot )
--> 577     plotThres ( InferenceObject, ax=ax_plot )
    578     plotPMF   ( InferenceObject, ax=ax_plot, showdesc=True )
    579     if infer in ["BayesInference","ASIRInference"]:

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in plotThres(InferenceObject, ax, color)
    480 
    481     for k,cut in enumerate(InferenceObject.cuts):
--> 482         c25,c975 = InferenceObject.getCI ( cut=k, conf=(.025,.975) )
    483         thres = InferenceObject.getThres ( cut )
    484         ylev = InferenceObject.evaluate ( [thres] )

/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.pyc in getCI(self, cut, conf, thres_or_slope)
    452                 vals.append(stats.norm.cdf( bias + ( stats.norm.ppf(pp) + bias ) / (1-acc*(stats.norm.ppf(pp) + bias )) ))
    453 
--> 454             return p.prctile ( self.__bthres[:,cut], 100*N.array(vals) )
    455         elif thres_or_slope[0] == "s":
    456             bias = self.__sl_bias[cut]

/usr/local/lib/python2.7/site-packages/matplotlib/mlab.pyc in prctile(x, p)
    953         frac[cond] += 1
    954 
--> 955     return _interpolate(values[ai],values[bi],frac)
    956 
    957 def prctile_rank(x, p):

IndexError: index -9223372036854775808 is out of bounds for size 2000
  • Top left: displays the given data points and the fitted psychometric function. Thresholds and confidence intervals are plotted at three levels (default: 25%, 50% and 75% ). Mind that the y-axis starts at 0.5 (the guess rate in a 2AFC task), therefore the 50% threshold is located at . signifies the deviance value.
  • Bottom left: histogram of bootstrapped deviance values (default = 2000 samples). The 95% confidence limits are indicated by red dotted lines and the actually observed deviance is indicated by the solid red line. If the observed deviance is outside the 95% confidence limits that indicates a bad fit and you will receive a warning message.
  • Top middle: deviance residuals are plotted as a function of the predicted correct response rate of the model (x-axis corresponds to y-axis in panel 1). This plot helps you to detect systematic deviations between the model and the data. The dotted line is the best linear fit that relates deviance residuals to the predicted correct response rate. Rpd gives the numerical value of that correlation. Note that the residuals are scaled to account for differences in the variability of a binomially distributed random variable (e.g. maximum variance at p=0.5).
  • Bottom middle: histogram of bootstrapped correlation coefficients for the correlation between residuals and performance level (same logic applies as in panel 2). Dotted lines denote 95% intervals of the sampled correlation coefficients, the solid line marks the observed correlation between model prediction and deviance residuals.
  • Top right: deviance residuals are plotted as a function of block index i.e. the sequence in which the data were acquired (WARNING: this graph can be properly interpreted only when stimulus intensities were fixed in separate blocks). If the observer was learning, the fitted linear correlation between residuals and block index should be positive.
  • Bottom right: histogram of bootstrapped correlation coefficients for the correlation between deviance residuals and block index (same logic applies as in panel 2 and 4).
In [13]:
priors = ( 'Gauss(0,5)', 'Gamma(1,3)', 'Beta(2,20)' )
  Hide output
In [14]:
mcmc = psi.BayesInference ( psidata, priors=priors, nafc=nafc )
  Hide output
Acceptance: 0.943333333333
Acceptance:
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in divide
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:60: RuntimeWarning: invalid value encountered in double_scalars
  fitted[i,j,k] = T[i,j,:].sum() * T[:,j,k].sum() / T[:,j,:].sum()
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )

 0.541666666667
Acceptance: 0.525
Acceptance: 0.506015960712

/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )

In [15]:
mcmc.estimate
  Hide output
Out[15]:
array([-1.63854678,  2.99021992,  0.0866207 ])
In [16]:
psi.ConvergenceMCMC ( mcmc )
  Hide output
/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.py:1330: RuntimeWarning: invalid value encountered in double_scalars
  B = n * sum( (psi_i-psi_mean)**2 ) / (m-1)

In [17]:
psi.GoodnessOfFit ( mcmc )
  Hide output
/usr/local/lib/python2.7/site-packages/matplotlib/lines.py:503: RuntimeWarning: invalid value encountered in greater_equal
  return np.alltrue(x[1:] - x[0:-1] >= 0)
/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.py:267: RuntimeWarning: invalid value encountered in greater_equal
  pval = N.mean( (simdata-observed)>=0 )

In [18]:
 psi.ParameterPlot ( mcmc )
  Hide output
Out[18]:
[<matplotlib.axes.Axes at 0x10cd42610>,
 <matplotlib.axes.Axes at 0x10ce1aa90>,
 <matplotlib.axes.Axes at 0x10cef8810>]
In [19]:
psi.plotInfluential(mcmc)
  Hide output
In [19]:
 
  Hide output
In []:
 
  Hide output

Testing basic functions of Motion Clouds

Motion Clouds: raw principles

Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.

In [6]:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output
In [7]:
def show_video(filename):
    from IPython.display import display, Image, HTML
    from base64 import b64encode
    video = open(mc.figpath + filename + mc.vext, "rb").read()
    video_encoded = b64encode(video)
    video_tag = '<video controls  autoplay="autoplay" loop="loop" width=350px src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
    display(HTML(data=video_tag))
  Hide output

Using Fourier ("official Motion Clouds")

In [8]:
import sys
sys.path.append('..')
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
  Hide output

Using mixtures of images

In [9]:
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
  Hide output
(512, 512)

In [11]:
plt.imshow(lena, cmap=plt.cm.gray)
  Hide output
Out[11]:
<matplotlib.image.AxesImage at 0x104fdb450>
In [12]:
def noise(image=lena):
    for axis in [0, 1]:
        image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
    return image
  Hide output
In [13]:
plt.imshow(noise(), cmap=plt.cm.gray)
  Hide output
Out[13]:
<matplotlib.image.AxesImage at 0x104f7bc50>
In [14]:
plt.imshow(noise(), cmap=plt.cm.gray)
  Hide output
Out[14]:
<matplotlib.image.AxesImage at 0x104ff2d90>

Using ARMA processes

Now, we define the ARMA process as an averaging process with a certain time constant $\tau=30.$ (in frames).

In [15]:
def ARMA(image, tau=30.):
    image = (1 - 1/tau)* image + 1/tau * noise()
    return image
  Hide output

initializing

In [17]:
image = ARMA(lena)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[17]:
<matplotlib.image.AxesImage at 0x106d75990>
In [18]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[18]:
<matplotlib.image.AxesImage at 0x1076aff90>
In [19]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[19]:
<matplotlib.image.AxesImage at 0x107d66490>
In [20]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[20]:
<matplotlib.image.AxesImage at 0x107d98950>
In [21]:
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame): 
    z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
  Hide output
calculating  99% |############################################# | ETA:  0:00:00
Saving sequence results/arma.mp4

calculating 100% |##############################################| Time: 0:00:58

In [22]:
show_video(filename='arma')
  Hide output
In []:
 
  Hide output

Testing discrimination between Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

In []:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output

The experiment

In [6]:
#%pycat psychopy_discrimination.py
  Hide output

Analysis - version 1

In a first version of the experiment, we only stored the results in a numpy file.

In [7]:
!ls  data/discriminating_*
  Hide output
data/discriminating_john_Jan_23_1515.npy  data/discriminating_lup_Jan_23_1401.npy  data/discriminating_lup_Jan_24_0914.npy  data/discriminating_lup_Jan_24_0931.npy  data/discriminating_v2_lup_Feb_07_1409.pickle
data/discriminating_lup.pickle		  data/discriminating_lup_Jan_23_1735.npy  data/discriminating_lup_Jan_24_0919.npy  data/discriminating_lup_Jan_24_0937.npy  data/discriminating_v2_lup_Feb_07_1434.pickle

In [8]:
import glob
for fn in glob.glob('data/discriminating_*npy'):
    results = np.load(fn)
    print fn, results.shape
    print('analyzing results')
    print '# of trials :', numpy.abs(results).sum(axis=1)
    print 'average results: ', (results.sum(axis=1)/numpy.abs(results).sum(axis=1)*.5+.5)
  Hide output
data/discriminating_john_Jan_23_1515.npy (2, 50)
analyzing results
# of trials : [ 50.          24.50835188]
average results:  [ 0.48  1.  ]
data/discriminating_lup_Jan_23_1401.npy (2, 50)
analyzing results
# of trials : [ 50.          28.12570981]
average results:  [ 0.66  1.  ]
data/discriminating_lup_Jan_23_1735.npy (3, 50)
analyzing results
# of trials : [  9.  14.  13.]
average results:  [ 1.  1.  1.]
data/discriminating_lup_Jan_24_0914.npy (3, 50)
analyzing results
# of trials : [ 17.  21.  12.]
average results:  [ 0.64705882  0.85714286  1.        ]
data/discriminating_lup_Jan_24_0919.npy (3, 50)
analyzing results
# of trials : [ 10.  25.  15.]
average results:  [ 0.7         0.32        0.53333333]
data/discriminating_lup_Jan_24_0931.npy (7, 50)
analyzing results
# of trials : [  3.   4.   8.   8.   7.  12.   8.]
average results:  [ 0.66666667  1.          0.625       0.375       1.          0.16666667
  0.125     ]
data/discriminating_lup_Jan_24_0937.npy (7, 50)
analyzing results
# of trials : [  7.   5.   6.  12.  10.   2.   8.]
average results:  [ 0.85714286  0.8         0.83333333  0.41666667  0.2         1.          0.375     ]

In [9]:
%whos
  Hide output
Variable   Type       Data/Info
-------------------------------
fn         str        data/discriminating_lup_Jan_24_0937.npy
glob       module     <module 'glob' from '/usr<...>/lib/python2.7/glob.pyc'>
results    ndarray    7x50: 350 elems, type `float64`, 2800 bytes

Another solution is to use:

In [10]:
data= 1
np.savez('toto', data=data, results=results)
print np.load('toto.npz')['data']
  Hide output
1

or directly psychopy.misc:

In [11]:
from psychopy import misc
  Hide output
In [12]:
info = misc.fromFile('data/discriminating.pickle')
  Hide output
In [13]:
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = 'data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
  Hide output

Analysis - version 2

In the second version, we stored more data:

In [14]:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/discriminating_v2_*pickle'):
    data = misc.fromFile(fn)
    print fn, results.shape
    print('analyzing results')
    alphas = np.array(data['alphas'])
    print ' alphas = ', alphas
    print '# of trials :', numpy.abs(data['results']).sum(axis=1)
    print 'average results: ', (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5)
    width = (alphas.max()-alphas.min())/len(alphas)
    ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
#    for i_trial in xrange(data['results'].shape[1]):
#        ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
  Hide output
data/discriminating_v2_lup_Feb_07_1409.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [ 10.   4.   5.  11.   6.  11.   3.]
average results:  [ 0.7         1.          0.4         0.54545455  0.83333333  0.27272727
  1.        ]
data/discriminating_v2_lup_Feb_07_1434.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [  4.   3.  10.  12.   8.   8.   5.]
average results:  [ 0.75        0.66666667  0.8         0.5         0.125       0.25        0.4       ]

Out[14]:
<matplotlib.text.Text at 0x10f75e850>
In [16]:
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0
  Hide output
In [15]:
  Hide output
    Share